home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
CD Concept 6
/
CD Concept 06.iso
/
mac
/
UTILITAIRE
/
RLaB
/
toolbox
/
mdsmax.r
< prev
next >
Wrap
Text File
|
1994-12-19
|
7KB
|
215 lines
#
#MDSMAX [x, fmax, nf] = MDSMAX(fun, x0, STOPIT, SAVIT) attempts to
# maximize the function specified by the string fun, using the
# starting vector x0. The method of multi-directional search is used.
# Output arguments:
# x = vector yielding largest function value found,
# fmax = function value at x,
# nf = number of function evaluations.
# The iteration is terminated when either
# - the relative size of the simplex is <= STOPIT(1)
# (default 1e-3),
# - STOPIT(2) function evaluations have been performed
# (default inf, i.e., no limit), or
# - a function value equals or exceeds STOPIT(3)
# (default inf, i.e., no test on function values).
# The form of the initial simplex is determined by STOPIT(4):
# STOPIT(4) = 0: regular simplex (sides of equal length, the default)
# STOPIT(4) = 1: right-angled simplex.
# Progress of the iteration is not shown if STOPIT(5) = 0 (default 1).
# If a non-empty fourth parameter string SAVIT is present, then
# `SAVE SAVIT x fmax nf' is executed after each inner iteration.
# NB: x0 can be a matrix. In the output argument, in SAVIT saves,
# and in function calls, x has the same shape as x0.
# This implementation uses 2n elements of storage (two simplices), where x0
# is an n-vector. It is based on the algorithm statement in [2, sec.3],
# modified so as to halve the storage (with a slight loss in readability).
# References:
# [1] V.J. Torczon, Multi-directional search: A direct search algorithm for
# parallel machines, Ph.D. Thesis, Rice University, Houston, Texas, 1989.
# [2] V.J. Torczon, On the convergence of the multi-directional search
# algorithm, SIAM J. Optimization, 1 (1991), pp. 123-145.
# [3] N.J. Higham, Optimization by direct search in matrix computations,
# Numerical Analysis Report No. 197, University of Manchester, UK, 1991;
# to appear in SIAM J. Matrix Anal. Appl, 14 (2), April 1993.
# By Nick Higham, Department of Mathematics, University of Manchester, UK.
# na.nhigham@na-net.ornl.gov
# July 27, 1991.
#
# Translated to RLaB, Ian Searle
# Febuary 1994.
#
# Dependencies
required rem
mdsmax = function (fun, X, stopit, savit)
{
global (eps)
x = X; # Copy input
n = prod(size(x));
x0 = x[:]; # Work with column vector internally.
mu = 2; # Expansion factor.
theta = 0.5; # Contraction factor.
# Set up convergence parameters etc.
if (!exist(stopit)) { stopit[1] = 1e-3; }
tol = stopit[1]; # Tolerance for cgce test based on relative size of simplex.
if (max(size(stopit)) == 1) { stopit[2] = inf(); } # Max no. of f-evaluations.
if (max(size(stopit)) == 2) { stopit[3] = inf(); } # Default target for f-values.
if (max(size(stopit)) == 3) { stopit[4] = 0; } # Default initial simplex.
if (max(size(stopit)) == 4) { stopit[5] = 1; } # Default: show progress.
trace = stopit[5];
if (!exist(savit)) { savit = []; } # File name for snapshots.
V = [zeros(n,1), eye(n,n)]; T = V;
f = zeros(n+1,1); ft = f;
V[;1] = x0;
x = reshape (x0, x.nr, x.nc);
f[1] = fun (x);
fmax_old = f[1];
if (trace) { printf("f(x0) = %9.4e\n", f[1]); }
k = 0; m = 0;
# Set up initial simplex.
scale = max([norm(x0,"i"),1]);
if (stopit[4] == 0)
{
# Regular simplex - all edges have same length.
# Generated from construction given in reference [18, pp. 80-81] of [1].
alpha = scale / (n*sqrt(2)) * [ sqrt(n+1)-1+n, sqrt(n+1)-1 ];
V[;2:n+1] = (x0 + alpha[2]*ones(n,1)) * ones(1,n);
for (j in 2:n+1)
{
V[j-1;j] = x0[j-1] + alpha[1];
x = reshape (V[;j], x.nr, x.nc);
f[j] = fun (x);
}
else
# Right-angled simplex based on co-ordinate axes.
alpha = scale*ones(n+1,1);
for (j in 2:n+1)
{
V[;j] = x0 + alpha[j]*V[;j];
x = reshape (V[;j], x.nr, x.nc);
f[j] = fun (x);
}
}
nf = n+1;
msize = 0; # Integer that keeps track of expansions/contractions.
flag_break = 0; # Flag which becomes true when ready to quit outer loop.
while (1) ###### Outer loop.
{
k = k+1;
# Find a new best vertex x and function value fmax = f(x).
fmax = max (f); j = maxi (f);
V[;1,j] = V[;j,1];
v1 = V[;1];
if (!isempty(savit)) { x = reshape(v1, x,nr, x.nc); write ("savit", x,fmax,nf); }
f[1,j] = f[j,1];
if (trace)
{
printf("Iter. %2.0f, inner = %2.0f, size = %2.0f, ", k, m, msize);
printf("nf = %3.0f, f = %9.4e (%2.1f)\n", nf, fmax, ...
100*(fmax-fmax_old)/(abs(fmax_old)+eps));
}
fmax_old = fmax;
# Stopping Test 1 - f reached target value?
if (fmax >= stopit[3])
{
msg = "Exceeded target...quitting\n";
break # Quit.
}
m = 0;
while (1) ### Inner repeat loop.
{
m = m+1;
# Stopping Test 2 - too many f-evals?
if (nf >= stopit[2])
{
msg = "Max no. of function evaluations exceeded...quitting\n";
flag_break = 1;
break # Quit.
}
# Stopping Test 3 - converged? This is test (4.3) in [1].
size_simplex = norm(V[;2:n+1] - v1[;ones(1,n)],"1") / max([1, norm(v1,"1")]);
if (size_simplex <= tol)
{
sprintf(msg, "Simplex size %9.4e <= %9.4e...quitting\n", ...
size_simplex, tol);
flag_break = 1;
break # Quit.
}
for (j in 2:n+1) # ---Rotation (reflection) step.
{
T[;j] = 2*v1 - V[;j];
x = reshape (T[;j], x.nr, x.nc);
ft[j] = fun (x);
}
nf = nf + n;
replaced = ( max(ft[2:n+1]) > fmax );
if (replaced)
{
for (j in 2:n+1) # ---Expansion step.
{
V[;j] = (1-mu)*v1 + mu*T[;j];
x = reshape (V[;j], x.nr, x.nc);
f[j] = fun (x);
}
nf = nf + n;
# Accept expansion or rotation?
if (max(ft[2:n+1]) > max(f[2:n+1]))
{
V[;2:n+1] = T[;2:n+1];
f[2:n+1] = ft[2:n+1]; # Accept rotation.
else
msize = msize + 1; # Accept expansion (f and V already set).
}
else
for (j in 2:n+1) # ---Contraction step.
{
V[;j] = (1+theta)*v1 - theta*T[;j];
x = reshape (V[;j], x.nr, x.nc);
f[j] = fun (x);
}
nf = nf + n;
replaced = ( max(f[2:n+1]) > fmax );
# Accept contraction (f and V already set).
msize = msize - 1;
}
if (replaced) { break }
if (trace && rem(m,10) == 0)
{
printf(" ...inner = %2.0f...\n",m);
}
} ### Of inner repeat loop.
if (flag_break) { break }
} ###### Of outer loop.
# Finished.
if (trace) { printf(msg); }
x = reshape (v1, x.nr, x.nc);
return << x = x; fmax = fmax; nf = nf>>;
};